MODELO RNN#
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, Dense, Dropout
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error
from statsmodels.stats.diagnostic import acorr_ljungbox
import matplotlib.pyplot as plt
import statsmodels.api as sm
from math import sqrt
import plotly.io as pio
import plotly.express as px
import plotly.offline as py
pio.renderers.default = "notebook"
Datos#
data = pd.read_csv(r"C:\Users\claud\Downloads\Bitcoin Historical Data.csv")
data
| Date | Price | Open | High | Low | Vol. | Change % | |
|---|---|---|---|---|---|---|---|
| 0 | 03/24/2024 | 67,211.9 | 64,036.5 | 67,587.8 | 63,812.9 | 65.59K | 4.96% |
| 1 | 03/23/2024 | 64,037.8 | 63,785.6 | 65,972.4 | 63,074.9 | 35.11K | 0.40% |
| 2 | 03/22/2024 | 63,785.5 | 65,501.5 | 66,633.3 | 62,328.3 | 72.43K | -2.62% |
| 3 | 03/21/2024 | 65,503.8 | 67,860.0 | 68,161.7 | 64,616.1 | 75.26K | -3.46% |
| 4 | 03/20/2024 | 67,854.0 | 62,046.8 | 68,029.5 | 60,850.9 | 133.53K | 9.35% |
| ... | ... | ... | ... | ... | ... | ... | ... |
| 4994 | 07/22/2010 | 0.1 | 0.1 | 0.1 | 0.1 | 2.16K | 0.00% |
| 4995 | 07/21/2010 | 0.1 | 0.1 | 0.1 | 0.1 | 0.58K | 0.00% |
| 4996 | 07/20/2010 | 0.1 | 0.1 | 0.1 | 0.1 | 0.26K | 0.00% |
| 4997 | 07/19/2010 | 0.1 | 0.1 | 0.1 | 0.1 | 0.57K | 0.00% |
| 4998 | 07/18/2010 | 0.1 | 0.0 | 0.1 | 0.1 | 0.08K | 0.00% |
4999 rows × 7 columns
Para que los datos estén en el formato correcto se les aplicará el siguiente proceso de eliminación de formatos
# Eliminar comas y convertir las columnas relevantes a float
data['Price'] = data['Price'].str.replace(',', '').astype(float)
data['Open'] = data['Open'].str.replace(',', '').astype(float)
data['High'] = data['High'].str.replace(',', '').astype(float)
# Función para convertir valores con 'K', 'M', 'B' o números directos
def convert_volume(value):
# Verificar si el valor ya es numérico
if isinstance(value, float) or isinstance(value, int):
return value # Devolver el valor si ya es numérico
elif 'K' in value:
return float(value.replace('K', '')) * 1_000
elif 'M' in value:
return float(value.replace('M', '')) * 1_000_000
elif 'B' in value:
return float(value.replace('B', '')) * 1_000_000_000
else:
return float(value.replace(',', ''))
# Aplicar la función a la columna 'Vol.'
data['Vol.'] = data['Vol.'].apply(convert_volume)
# Verificar que la conversión haya sido exitosa
print(data[['Vol.']].head())
Vol.
0 65590.0
1 35110.0
2 72430.0
3 75260.0
4 133530.0
Se verifica el formato de la fecha y se agrega la columnna indice
# Asegurarse de que la columna "Date" sea de tipo datetime
data['Date'] = pd.to_datetime(data['Date'], format='%m/%d/%Y')
# Ordenar los datos por fecha de forma ascendente
data = data.sort_values(by='Date', ascending=True).reset_index(drop=True)
# Verificar si hay valores nulos en cada columna
print(data.isnull().sum())
# Alternativa: Mostrar las filas que contengan valores faltantes
missing_data = data[data.isnull().any(axis=1)]
print(f"Filas con datos faltantes:\n{missing_data}")
# Asegúrate de que la columna 'Date' esté en formato datetime
# Convertir la fecha a un timestamp (número de días desde Epoch)
# Ver las primeras filas con la nueva columna numérica
data['indice'] = range(len(data))
Date 0
Price 0
Open 0
High 0
Low 0
Vol. 6
Change % 0
dtype: int64
Filas con datos faltantes:
Date Price Open High Low Vol. Change %
337 2011-06-20 17.5 17.5 17.5 17.5 NaN 0.00%
338 2011-06-21 17.5 17.5 17.5 17.5 NaN 0.00%
339 2011-06-22 17.5 17.5 17.5 17.5 NaN 0.00%
340 2011-06-23 17.5 17.5 17.5 17.5 NaN 0.00%
341 2011-06-24 17.5 17.5 17.5 17.5 NaN 0.00%
342 2011-06-25 17.5 17.5 17.5 17.5 NaN 0.00%
Funcines#
Ventanas deslizantes#
Para esto creamos una función que divide los datos en entrenamiento, validación y prueba. Esta función avanca con un paso de 1. las ventanas y el horizonte de predicción son variables
import numpy as np
def create_sliding_windows(data, window_size, horizon):
X_train, Y_train = [], []
X_val, Y_val = [], []
X_test, Y_test = [], []
# Generar ventanas deslizantes con sus índices correspondientes
for i in range(0, len(data) - window_size - 3 * horizon + 1, 1):
# Entrenamiento
X_train.append(data[i:i + window_size])
Y_train.append(data[i + window_size:i + window_size + horizon])
# Validación
X_val.append(data[i + window_size:i + window_size + horizon])
Y_val.append(data[i + window_size + horizon:i + window_size + 2 * horizon])
# Prueba
X_test.append(data[i + window_size + horizon:i + window_size + 2 * horizon])
Y_test.append(data[i + window_size + 2 * horizon:i + window_size + 3 * horizon])
# Convertir las ventanas a arrays NumPy con forma (n_samples, timesteps, features)
# Convertir a arrays de NumPy y ajustar las dimensiones
X_train = np.array(X_train).reshape(-1, window_size, 1) # (n_samples, timesteps, n_features)
X_val = np.array(X_val).reshape(-1, window_size, 1)
X_test = np.array(X_test).reshape(-1, window_size, 1)
Y_train = np.array(Y_train) # (n_samples, horizon)
Y_val = np.array(Y_val)
Y_test = np.array(Y_test)
return X_train, Y_train, X_val, Y_val, X_test, Y_test
Construcción del Modelo#
Creamos una función para crear el modelo teniendo en cuenta los parametros de las iteraciones neurons, window, dropout_rate, horizon. Batch size se aplicará más adelante
from tensorflow.keras.layers import SimpleRNN, Dense, Dropout, Input
def build_rnn_model(neurons, window, dropout_rate, horizon):
model = Sequential()
model.add(Input(shape=(window, 1)))
model.add(SimpleRNN(neurons, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(horizon))
model.compile(optimizer='adam', loss='mse') # Usar la pérdida personalizada
return model
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.tsa.stattools import q_stat, acf
import statsmodels.api as sm
from scipy.stats import shapiro
Métricas#
Creamos una función que calcule las metricas importantes
def metricas(windows, pred, neurons, batchsize, dropout_rate, horizon, window):
# Calcular métricas en Validación
if np.isnan(pred).any():
print("Advertencia: Las predicciones contienen NaN.")
metrics=None
else:
mae = mean_absolute_error(windows, pred)
mse = mean_squared_error(windows, pred)
rmse = np.sqrt(mse)
mape = np.mean(np.abs((windows - pred) / windows)) * 100
r2 = r2_score(windows, pred)
# Guardar métricas del conjunto de validación junto con pruebas de hipótesis
metrics = {
'neurons': neurons,
'batchsize': batchsize,
'Dropout': dropout_rate,
'horizon': horizon,
'window': window,
'MAPE': mape,
'MAE': mae,
'MSE': mse,
'RMSE': rmse,
'R2': r2,
}
return metrics
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot
from statsmodels.tsa.stattools import acf, q_stat
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import plotly.express as px
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
Función de optimización#
se creo una función con la cual se itera entre todas las posibles combinaciones de paramtros. Se tiene en cuenta el early stoping y la estabilización de la metrica mse para encontar el mejor modelo. luego de que el mse no varie más del 5% 5 iteraciones seguidas se detiene. Además se guardan los residuos y metricas de cada modelo para su posterior uso.
def busqueda(datos, windows, horizons, dropout_rates, batchsizes, neurons_list):
validation_metrics = []
test_metrics = []
train_metrics = []
residual_hist = []
best_model = None
params_best_model = {}
best_score = np.inf
previous_mse = None # Para rastrear el MSE de la iteración anterior
residuals_bestmodel = None
stable_iterations = 0 # Contador para rastrear iteraciones estables
early_stop = EarlyStopping(
monitor='val_loss',
patience=5,
restore_best_weights=True,
verbose=1
)
for neurons in neurons_list:
for window in windows:
for horizon in horizons:
if window == horizon:
x_train, y_train, x_val, y_val, x_test, y_test = create_sliding_windows(
datos, window, horizon)
for dropout_rate in dropout_rates:
for batchsize in batchsizes:
# Crear y entrenar el modelo
print(f"Entrenando modelo con {neurons} neuronas, dropout {dropout_rate}, "
f"batch size {batchsize}, horizon {horizon}, window {window}")
model = build_rnn_model(neurons, window, dropout_rate, horizon)
early_stop = EarlyStopping(
monitor='val_loss', patience=2,
restore_best_weights=True, verbose=1)
history = model.fit(
x_train, y_train,
validation_data=(x_val, y_val),
callbacks=[early_stop],
epochs=1, batch_size=batchsize, verbose=0
)
# Predicciones y cálculo de residuos
val_pred = model.predict(x_val)
test_pred = model.predict(x_test)
train_pred = model.predict(x_train)
residuals = y_val - val_pred # Residuos de validación
residual_hist.append({
'neurons': neurons,
'batchsize': batchsize,
'dropout_rate': dropout_rate,
'horizon': horizon,
'window': window,
'residuals': residuals
})
# Calcular métricas en validación
metricas_val = metricas(y_val, val_pred, neurons, batchsize, dropout_rate, horizon, window)
validation_metrics.append(metricas_val)
# Calcular métricas en prueba
metricas_test = metricas(y_test, test_pred, neurons, batchsize, dropout_rate, horizon, window)
test_metrics.append(metricas_test)
# Calcular métricas en entrenamiento
metricas_train = metricas(y_train, train_pred, neurons, batchsize, dropout_rate, horizon, window)
train_metrics.append(metricas_train)
if metricas_val:
mse = metricas_val['MSE']
print(f"MSE: {mse}")
# Verificar la fluctuación del MSE
if previous_mse:
variation = abs(mse - previous_mse) / previous_mse
if variation < 0.05:
stable_iterations += 1
print(f"Fluctuación menor al 5% en el MSE por {stable_iterations} iteración(es).")
else:
stable_iterations = 0 # Reiniciar contador si hay variación mayor al 5%
if stable_iterations >= 5:
print("Fluctuación menor al 5% durante 5 iteraciones consecutivas. Deteniendo la búsqueda.")
return (validation_metrics, test_metrics, train_metrics,
residual_hist, best_model, params_best_model, residuals_bestmodel)
previous_mse = mse
# Guardar el mejor modelo si es necesario
if mse < best_score:
best_score = mse
best_model = model
params_best_model = {
'neurons': neurons,
'batchsize': batchsize,
'dropout_rate': dropout_rate,
'horizon': horizon,
'window': window,
'MSE': mse
}
residuals_bestmodel = residuals
return (validation_metrics, test_metrics, train_metrics, residual_hist, best_model, params_best_model, residuals_bestmodel)
Serie Price#
Utilización de las funciones para encontrar los mejores parametros para la serie de tiempo Price
windows=[7,14,21,28]
horizons=[7,14,21,28]
dropout_rates=[0.2,0.4,0.6,0.8]
batchsizes=[16,32,64,128]
neurons_list=[10]
datos=data['Price']
validation_metrics_price,test_metrics_price,train_metrics_price,residual_hist_price,best_model_price,params_best_model_price,residuals_bestmodel_price=busqueda (datos,windows,horizons,dropout_rates,batchsizes,neurons_list)
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 16, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 982us/step
156/156 [==============================] - 0s 967us/step
MSE: 12452252.273351375
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 32, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 941us/step
156/156 [==============================] - 0s 971us/step
MSE: 103339869.10436554
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 64, horizon 7, window 7
156/156 [==============================] - 0s 2ms/step
156/156 [==============================] - 0s 972us/step
156/156 [==============================] - 0s 1ms/step
MSE: 331992502.26729834
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 128, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 946us/step
156/156 [==============================] - 0s 1ms/step
MSE: 506037030.78467923
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 16, horizon 7, window 7
156/156 [==============================] - 0s 986us/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
MSE: 29711093.427964874
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 32, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 873us/step
156/156 [==============================] - 0s 923us/step
MSE: 311592733.408696
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 64, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
MSE: 337853068.50400215
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 128, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 999us/step
MSE: 684663553.5457963
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 16, horizon 7, window 7
156/156 [==============================] - 0s 978us/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 2ms/step
MSE: 101947489.06413133
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 32, horizon 7, window 7
156/156 [==============================] - 0s 993us/step
156/156 [==============================] - 0s 860us/step
156/156 [==============================] - 0s 1ms/step
MSE: 343594422.68238723
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 64, horizon 7, window 7
156/156 [==============================] - 0s 2ms/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
MSE: 265200666.2155233
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 128, horizon 7, window 7
156/156 [==============================] - 0s 932us/step
156/156 [==============================] - 0s 985us/step
156/156 [==============================] - 0s 1ms/step
MSE: 496670228.78050506
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 16, horizon 7, window 7
156/156 [==============================] - 0s 2ms/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 2ms/step
MSE: 228219984.23652956
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 32, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
MSE: 270549977.00925046
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 64, horizon 7, window 7
156/156 [==============================] - 0s 2ms/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
MSE: 376873927.30295175
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 128, horizon 7, window 7
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
156/156 [==============================] - 0s 1ms/step
MSE: 497651566.2716198
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 16, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 2ms/step
MSE: 17859420.4087967
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 32, horizon 14, window 14
155/155 [==============================] - 1s 2ms/step
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 270036687.03612465
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 64, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 176727765.3053069
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 128, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 286342836.4258379
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 16, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 2ms/step
MSE: 115117603.55270602
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 32, horizon 14, window 14
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 280570994.4851838
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 64, horizon 14, window 14
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 320689169.2540055
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 128, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 302791932.69403464
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 16, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 219449290.4331821
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 32, horizon 14, window 14
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 290332139.29967797
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 64, horizon 14, window 14
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 345399588.17814314
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 128, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 325130118.66766876
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 16, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 178392852.30478185
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 32, horizon 14, window 14
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 2ms/step
MSE: 232325207.71717525
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 64, horizon 14, window 14
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 2ms/step
MSE: 394306214.92552096
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 128, horizon 14, window 14
155/155 [==============================] - 0s 1ms/step
155/155 [==============================] - 0s 2ms/step
155/155 [==============================] - 0s 1ms/step
MSE: 331074856.15727603
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 16, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 21052973.958161116
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 32, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 53939785.29199177
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 64, horizon 21, window 21
154/154 [==============================] - 0s 1ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 163904706.56376624
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 128, horizon 21, window 21
154/154 [==============================] - 0s 1ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 484451102.243072
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 16, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 43176791.38989697
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 32, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 69012334.93881208
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 64, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 230664271.77816445
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 128, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 1ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 260952081.13927358
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 16, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 116040378.90029314
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 32, horizon 21, window 21
154/154 [==============================] - 0s 1ms/step
154/154 [==============================] - 0s 1ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 203071950.83654052
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 64, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 1ms/step
MSE: 272189373.6644945
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 128, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 1ms/step
154/154 [==============================] - 0s 1ms/step
MSE: 315139656.2349957
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 16, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 1ms/step
MSE: 216854519.02884093
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 32, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 3ms/step
MSE: 327030307.95526236
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 64, horizon 21, window 21
154/154 [==============================] - 1s 3ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 287183954.22054756
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 128, horizon 21, window 21
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
154/154 [==============================] - 0s 2ms/step
MSE: 304752695.60864
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 16, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 20197233.507501774
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 32, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 77843563.17343508
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 64, horizon 28, window 28
153/153 [==============================] - 1s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 313883303.3728267
Entrenando modelo con 10 neuronas, dropout 0.2, batch size 128, horizon 28, window 28
153/153 [==============================] - 1s 3ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 311423696.81561404
Fluctuación menor al 5% en el MSE por 1 iteración(es).
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 16, horizon 28, window 28
153/153 [==============================] - 1s 2ms/step
153/153 [==============================] - 0s 3ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 42205416.36218546
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 32, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 3ms/step
153/153 [==============================] - 0s 3ms/step
MSE: 142743446.55582348
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 64, horizon 28, window 28
153/153 [==============================] - 1s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 242010678.43303877
Entrenando modelo con 10 neuronas, dropout 0.4, batch size 128, horizon 28, window 28
153/153 [==============================] - 1s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 313277427.60531324
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 16, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 58754945.48151473
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 32, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 277038417.1121744
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 64, horizon 28, window 28
153/153 [==============================] - 1s 3ms/step
153/153 [==============================] - 0s 3ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 271491228.9115111
Fluctuación menor al 5% en el MSE por 1 iteración(es).
Entrenando modelo con 10 neuronas, dropout 0.6, batch size 128, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 302243832.30486196
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 16, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 195916364.60108823
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 32, horizon 28, window 28
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 312307323.632018
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 64, horizon 28, window 28
153/153 [==============================] - 1s 2ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 307620163.8020115
Fluctuación menor al 5% en el MSE por 1 iteración(es).
Entrenando modelo con 10 neuronas, dropout 0.8, batch size 128, horizon 28, window 28
153/153 [==============================] - 1s 3ms/step
153/153 [==============================] - 0s 2ms/step
153/153 [==============================] - 0s 2ms/step
MSE: 334658737.2830239
Para evitar el desgaste hemos dividido las iteraciones. empezaremos con 10 neuronas Según la optimización los mejores parametros son:
params_best_model_price
{'neurons': 10,
'batchsize': 16,
'dropout_rate': 0.2,
'horizon': 7,
'window': 7,
'MSE': 12452252.273351375}
Guardaremos los datos relevantes de estas iteraciones, y luego haremos iteraciones con 1000 neuronas
def guardar_resultados(
validation_metrics_price, test_metrics_price, train_metrics_price,
residual_hist_price, best_model_price, params_best_model_price,
residuals_bestmodel_price,
resultados
):
# Hacer append de cada métrica en la lista correspondiente dentro del diccionario
resultados['validation_metrics_price'].append(validation_metrics_price)
resultados['test_metrics_price'].append(test_metrics_price)
resultados['train_metrics_price'].append(train_metrics_price)
resultados['residual_hist_price'].append(residual_hist_price)
resultados['best_model_price'].append(best_model_price)
resultados['params_best_model_price'].append(params_best_model_price)
resultados['residuals_bestmodel_price'].append(residuals_bestmodel_price)
resultados = {
'validation_metrics_price': [],
'test_metrics_price': [],
'train_metrics_price': [],
'residual_hist_price': [],
'best_model_price': [],
'params_best_model_price': [],
'residuals_bestmodel_price': []
}
guardar_resultados(validation_metrics_price,test_metrics_price,train_metrics_price,residual_hist_price,best_model_price,params_best_model_price,residuals_bestmodel_price,resultados)
windows=[7,14,21,28]
horizons=[7,14,21,28]
dropout_rates=[0.2,0.4,0.6,0.8]
batchsizes=[16,32,64,128]
neurons_list=[1000]
datos=data['Price']
validation_metrics_price,test_metrics_price,train_metrics_price,residual_hist_price,best_model_price,params_best_model_price,residuals_bestmodel_price=busqueda (datos,windows,horizons,dropout_rates,batchsizes,neurons_list)
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 16, horizon 7, window 7
156/156 [==============================] - 3s 17ms/step
156/156 [==============================] - 2s 16ms/step
156/156 [==============================] - 2s 14ms/step
MSE: 3766284.5581906224
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 32, horizon 7, window 7
156/156 [==============================] - 2s 14ms/step
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 3s 19ms/step
MSE: 5226223.713095929
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 64, horizon 7, window 7
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 2s 15ms/step
MSE: 2889732.349310062
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 128, horizon 7, window 7
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 3s 17ms/step
156/156 [==============================] - 3s 20ms/step
MSE: 3688648.6686281827
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 16, horizon 7, window 7
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 2s 15ms/step
MSE: 6608623.897730597
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 32, horizon 7, window 7
156/156 [==============================] - 2s 14ms/step
156/156 [==============================] - 2s 14ms/step
156/156 [==============================] - 2s 14ms/step
MSE: 2892794.2261356013
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 64, horizon 7, window 7
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 2s 14ms/step
MSE: 4406460.824957201
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 128, horizon 7, window 7
156/156 [==============================] - 2s 15ms/step
156/156 [==============================] - 2s 14ms/step
156/156 [==============================] - 2s 14ms/step
MSE: 6382008.478017636
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 16, horizon 7, window 7
156/156 [==============================] - 2s 13ms/step
156/156 [==============================] - 2s 14ms/step
156/156 [==============================] - 2s 13ms/step
MSE: 2875117.330187585
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 32, horizon 7, window 7
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 13ms/step
156/156 [==============================] - 2s 13ms/step
MSE: 3663291.91944888
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 64, horizon 7, window 7
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 13ms/step
156/156 [==============================] - 2s 12ms/step
MSE: 3019048.7917254753
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 128, horizon 7, window 7
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 13ms/step
MSE: 9218297.53065125
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 16, horizon 7, window 7
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
MSE: 2393739.836301642
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 32, horizon 7, window 7
156/156 [==============================] - 2s 13ms/step
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
MSE: 14683480.634844368
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 64, horizon 7, window 7
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
MSE: 6157901.491640183
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 128, horizon 7, window 7
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
156/156 [==============================] - 2s 12ms/step
MSE: 5780940.052257128
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 16, horizon 14, window 14
155/155 [==============================] - 4s 24ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 5111021.563363264
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 32, horizon 14, window 14
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 4766181.186866341
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 64, horizon 14, window 14
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 5140312.215198747
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 128, horizon 14, window 14
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 25ms/step
MSE: 13523950.80094181
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 16, horizon 14, window 14
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 6555161.865820982
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 32, horizon 14, window 14
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 7588417.960221002
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 64, horizon 14, window 14
155/155 [==============================] - 4s 22ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 5937109.220373919
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 128, horizon 14, window 14
155/155 [==============================] - 4s 22ms/step
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 23ms/step
MSE: 9473510.596641678
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 16, horizon 14, window 14
155/155 [==============================] - 4s 22ms/step
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 25ms/step
MSE: 8020482.686880559
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 32, horizon 14, window 14
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 24ms/step
155/155 [==============================] - 4s 25ms/step
MSE: 4019420.4542349363
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 64, horizon 14, window 14
155/155 [==============================] - 4s 24ms/step
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 25ms/step
MSE: 5334285.532509334
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 128, horizon 14, window 14
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 26ms/step
155/155 [==============================] - 4s 25ms/step
MSE: 6002418.261903864
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 16, horizon 14, window 14
155/155 [==============================] - 4s 24ms/step
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 10597908.024123814
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 32, horizon 14, window 14
155/155 [==============================] - 4s 24ms/step
155/155 [==============================] - 4s 26ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 6954765.814738643
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 64, horizon 14, window 14
155/155 [==============================] - 4s 24ms/step
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 25ms/step
MSE: 8638621.53203569
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 128, horizon 14, window 14
155/155 [==============================] - 4s 23ms/step
155/155 [==============================] - 4s 25ms/step
155/155 [==============================] - 4s 24ms/step
MSE: 6008457.718344638
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 16, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 37ms/step
154/154 [==============================] - 6s 36ms/step
MSE: 9540319.664807212
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 32, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 38ms/step
154/154 [==============================] - 6s 37ms/step
MSE: 7163518.133899264
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 64, horizon 21, window 21
154/154 [==============================] - 6s 37ms/step
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 37ms/step
MSE: 6596269.958426367
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 128, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 37ms/step
154/154 [==============================] - 6s 36ms/step
MSE: 11572298.96581625
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 16, horizon 21, window 21
154/154 [==============================] - 6s 35ms/step
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
MSE: 7992018.720861162
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 32, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 5s 36ms/step
MSE: 7072933.842670511
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 64, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 5s 35ms/step
154/154 [==============================] - 6s 37ms/step
MSE: 22413759.973742053
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 128, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 5s 35ms/step
MSE: 136397086.86302027
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 16, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 37ms/step
154/154 [==============================] - 6s 37ms/step
MSE: 14024751.98066872
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 32, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
MSE: 10383489.69036609
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 64, horizon 21, window 21
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
MSE: 5749707.507838933
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 128, horizon 21, window 21
154/154 [==============================] - 6s 37ms/step
154/154 [==============================] - 6s 36ms/step
154/154 [==============================] - 6s 36ms/step
MSE: 109782057.20907259
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 16, horizon 21, window 21
154/154 [==============================] - 7s 44ms/step
154/154 [==============================] - 7s 48ms/step
154/154 [==============================] - 6s 37ms/step
MSE: 77629742.22661093
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 32, horizon 21, window 21
154/154 [==============================] - 5s 34ms/step
154/154 [==============================] - 5s 34ms/step
154/154 [==============================] - 5s 34ms/step
MSE: 5746317.040690839
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 64, horizon 21, window 21
154/154 [==============================] - 5s 34ms/step
154/154 [==============================] - 5s 35ms/step
154/154 [==============================] - 5s 34ms/step
MSE: 7486563.556564862
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 128, horizon 21, window 21
154/154 [==============================] - 6s 35ms/step
154/154 [==============================] - 5s 35ms/step
154/154 [==============================] - 5s 35ms/step
MSE: 7526546.8792797495
Fluctuación menor al 5% en el MSE por 1 iteración(es).
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 16, horizon 28, window 28
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 8s 49ms/step
153/153 [==============================] - 7s 48ms/step
MSE: 11843297.985535605
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 32, horizon 28, window 28
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 7s 48ms/step
153/153 [==============================] - 7s 48ms/step
MSE: 8829760.176670508
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 64, horizon 28, window 28
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 8s 49ms/step
153/153 [==============================] - 7s 48ms/step
MSE: 9820214.265199494
Entrenando modelo con 1000 neuronas, dropout 0.2, batch size 128, horizon 28, window 28
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 7s 48ms/step
153/153 [==============================] - 8s 49ms/step
MSE: 136384024.4355501
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 16, horizon 28, window 28
153/153 [==============================] - 7s 46ms/step
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 7s 47ms/step
MSE: 7634057.904417909
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 32, horizon 28, window 28
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 7s 48ms/step
MSE: 10006925.963036377
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 64, horizon 28, window 28
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 7s 47ms/step
153/153 [==============================] - 7s 46ms/step
MSE: 7470894.798155763
Entrenando modelo con 1000 neuronas, dropout 0.4, batch size 128, horizon 28, window 28
153/153 [==============================] - 7s 45ms/step
153/153 [==============================] - 7s 46ms/step
153/153 [==============================] - 7s 47ms/step
MSE: 166183983.1911531
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 16, horizon 28, window 28
153/153 [==============================] - 7s 44ms/step
153/153 [==============================] - 7s 46ms/step
153/153 [==============================] - 7s 45ms/step
MSE: 9656501.563318912
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 32, horizon 28, window 28
153/153 [==============================] - 7s 42ms/step
153/153 [==============================] - 7s 43ms/step
153/153 [==============================] - 7s 44ms/step
MSE: 7311877.690503958
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 64, horizon 28, window 28
153/153 [==============================] - 7s 43ms/step
153/153 [==============================] - 7s 44ms/step
153/153 [==============================] - 7s 44ms/step
MSE: 16738557.549434375
Entrenando modelo con 1000 neuronas, dropout 0.6, batch size 128, horizon 28, window 28
153/153 [==============================] - 7s 44ms/step
153/153 [==============================] - 7s 44ms/step
153/153 [==============================] - 7s 44ms/step
MSE: 183074321.56474274
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 16, horizon 28, window 28
153/153 [==============================] - 7s 44ms/step
153/153 [==============================] - 7s 45ms/step
153/153 [==============================] - 7s 44ms/step
MSE: 12575878.946573868
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 32, horizon 28, window 28
153/153 [==============================] - 7s 44ms/step
153/153 [==============================] - 7s 44ms/step
153/153 [==============================] - 7s 44ms/step
MSE: 7222315.96569903
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 64, horizon 28, window 28
153/153 [==============================] - 7s 45ms/step
153/153 [==============================] - 7s 45ms/step
153/153 [==============================] - 7s 46ms/step
MSE: 9845789.246999932
Entrenando modelo con 1000 neuronas, dropout 0.8, batch size 128, horizon 28, window 28
153/153 [==============================] - 7s 45ms/step
153/153 [==============================] - 7s 46ms/step
153/153 [==============================] - 7s 45ms/step
MSE: 11781548.156002915
print(params_best_model_price)
guardar_resultados(validation_metrics_price,test_metrics_price,train_metrics_price,residual_hist_price,best_model_price,params_best_model_price,residuals_bestmodel_price,resultados)
{'neurons': 1000, 'batchsize': 16, 'dropout_rate': 0.8, 'horizon': 7, 'window': 7, 'MSE': 2393739.836301642}
De esta iteración podemos ver que a mayor neuronas se logró un mejor resultado del mse. Además coincide el tamaño de las ventanas y el batch size. por lo que para la siguientes iteraciones utilizaremos los siguientes parametros:
windows=[7]
horizons=[7]
dropout_rates=[0.2,0.6]
batchsizes=[16]
neurons_list=[10000]
datos=data['Price']
validation_metrics_price,test_metrics_price,train_metrics_price,residual_hist_price,best_model_price,params_best_model_price,residuals_bestmodel_price=busqueda (datos,windows,horizons,dropout_rates,batchsizes,neurons_list)
Entrenando modelo con 10000 neuronas, dropout 0.2, batch size 16, horizon 7, window 7
156/156 [==============================] - 118s 756ms/step
156/156 [==============================] - 118s 754ms/step
156/156 [==============================] - 118s 759ms/step
MSE: 14634695.23107263
Entrenando modelo con 10000 neuronas, dropout 0.6, batch size 16, horizon 7, window 7
156/156 [==============================] - 117s 751ms/step
156/156 [==============================] - 118s 757ms/step
156/156 [==============================] - 117s 753ms/step
MSE: 14263237.373552281
Fluctuación menor al 5% en el MSE por 1 iteración(es).
print(params_best_model_price)
guardar_resultados(validation_metrics_price,test_metrics_price,train_metrics_price,residual_hist_price,best_model_price,params_best_model_price,residuals_bestmodel_price,resultados)
{'neurons': 10000, 'batchsize': 16, 'dropout_rate': 0.6, 'horizon': 7, 'window': 7, 'MSE': 14263237.373552281}
resultados['params_best_model_price']
[{'neurons': 10,
'batchsize': 16,
'dropout_rate': 0.2,
'horizon': 7,
'window': 7,
'MSE': 12452252.273351375},
{'neurons': 1000,
'batchsize': 16,
'dropout_rate': 0.8,
'horizon': 7,
'window': 7,
'MSE': 2393739.836301642},
{'neurons': 1000,
'batchsize': 16,
'dropout_rate': 0.8,
'horizon': 7,
'window': 7,
'MSE': 2393739.836301642},
{'neurons': 10000,
'batchsize': 16,
'dropout_rate': 0.6,
'horizon': 7,
'window': 7,
'MSE': 14263237.373552281}]
De las iteraciones podemos determinar que el mejor modelo es {‘neurons’: 1000, ‘batchsize’: 16, ‘dropout_rate’: 0.8, ‘horizon’: 7, ‘window’: 7, ‘MSE’: 2393739.836301642},
import pandas as pd
import numpy as np
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.graphics.gofplots import qqplot
from statsmodels.tsa.stattools import acf
import matplotlib.pyplot as plt
import seaborn as sns
Residuales#
tomamos los residuos del mejor modelo para su evaluación y analisamos los residuales de cada ventana, además se mostraran las graficas para 5 ventanas aleatorias
res=resultados['residuals_bestmodel_price'][1]
ljung_box_pvalues = []
for i, ventana in enumerate(res):
ventana = np.array(ventana)
ljung_box_pvalue = acorr_ljungbox(ventana, lags=[6], return_df=True)['lb_pvalue'].values[0]
ljung_box_pvalues.append(ljung_box_pvalue)
df_resultados = pd.DataFrame({
'Ventana': range(1, len(res) + 1),
'LJung-Box (p-value)': ljung_box_pvalues
})
print(df_resultados.head(10))
# Seleccionar una muestra aleatoria de 5 ventanas para graficar
muestras = np.random.choice(len(res), size=5, replace=False)
for idx in muestras:
ventana = np.array(res[idx])
plt.figure(figsize=(4, 4))
plt.plot(ventana, marker='o')
plt.title(f'Serie de Residuos - Ventana {idx + 1}')
plt.xlabel('Índice')
plt.ylabel('Residuos')
plt.show()
qqplot(ventana, line='s')
plt.title(f'QQPlot - Ventana {idx + 1}')
plt.show()
acf_vals = acf(ventana, nlags=min(10, len(ventana) - 1))
plt.figure(figsize=(4, 4))
plt.stem(range(len(acf_vals)), acf_vals, use_line_collection=True)
plt.title(f'ACF de Residuos - Ventana {idx + 1}')
plt.xlabel('Lags')
plt.ylabel('Autocorrelación')
plt.show()
Ventana LJung-Box (p-value)
0 1 0.676734
1 2 0.676734
2 3 0.676734
3 4 0.676734
4 5 0.676734
5 6 0.676734
6 7 0.676734
7 8 0.676734
8 9 0.676734
9 10 0.676734
C:\Users\claud\AppData\Local\Temp\ipykernel_30548\1560686776.py:34: MatplotlibDeprecationWarning:
The 'use_line_collection' parameter of stem() was deprecated in Matplotlib 3.6 and will be removed two minor releases later. If any parameter follows 'use_line_collection', they should be passed as keyword, not positionally.
De los residuales podemos secir que no cumplen cumplen, no distribuyen normal, tienen asimetria, además no son independientes.
metrics_list = resultados['validation_metrics_price'] # Lista que contiene los diccionarios
metricas = []
for i in metrics_list:
for j in i:
metricas.append(j)
metricasval=pd.DataFrame(metricas)
metricasval = metricasval.drop_duplicates()
metricasval
| neurons | batchsize | Dropout | horizon | window | MAPE | MAE | MSE | RMSE | R2 | |
|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 10 | 16 | 0.2 | 7 | 7 | 20.825319 | 1538.285978 | 1.245225e+07 | 3528.774897 | 0.949479 |
| 1 | 10 | 32 | 0.2 | 7 | 7 | 48.715132 | 4561.517858 | 1.033399e+08 | 10165.621924 | 0.580599 |
| 2 | 10 | 64 | 0.2 | 7 | 7 | 93.333213 | 10254.999115 | 3.319925e+08 | 18220.661411 | -0.342755 |
| 3 | 10 | 128 | 0.2 | 7 | 7 | 106.328910 | 11504.183175 | 5.060370e+08 | 22495.266853 | -1.047983 |
| 4 | 10 | 16 | 0.4 | 7 | 7 | 28.534614 | 2721.015104 | 2.971109e+07 | 5450.788331 | 0.880185 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 125 | 1000 | 32 | 0.8 | 28 | 28 | 20.602082 | 1142.677532 | 7.222316e+06 | 2687.436691 | 0.969388 |
| 126 | 1000 | 64 | 0.8 | 28 | 28 | 17.128258 | 1416.032626 | 9.845789e+06 | 3137.800065 | 0.958232 |
| 127 | 1000 | 128 | 0.8 | 28 | 28 | 19.917796 | 1561.586124 | 1.178155e+07 | 3432.425987 | 0.949968 |
| 192 | 10000 | 16 | 0.2 | 7 | 7 | 19.452132 | 1940.604828 | 1.463470e+07 | 3825.532019 | 0.940812 |
| 193 | 10000 | 16 | 0.6 | 7 | 7 | 20.288160 | 1847.217263 | 1.426324e+07 | 3776.670144 | 0.942331 |
130 rows × 10 columns
fig = px.line(metricasval, y='MSE', title='MSE vs Índice', labels={'index': 'Índice', 'MSE': 'MSE'})
fig.show()
En esta figura podemos observar el MSE a lo largo de las iteraciones, vemos que a medida que se aumnetaron las neuronas el resultado mejoraba. Además delemitar la busqueda a partir de los parametros que ya se sabía que tenian buen desempeño permitió estabilizar la metrica.
Ahora compararemos los errores en el entrenamiento, prueba y test con la metrica mape porque esta representa los errores de manera porcentual
metrics_list = resultados['train_metrics_price'] # Lista que contiene los diccionarios
metricas_train = []
for i in metrics_list:
for j in i:
metricas_train.append(j)
metricas_train=pd.DataFrame(metricas_train)
metricas_train = metricas_train.drop_duplicates()
metrics_list = resultados['test_metrics_price']
metricas_test = []
for i in metrics_list:
for j in i:
metricas_test.append(j)
metricas_test=pd.DataFrame(metricas_test)
metricas_test = metricas_test.drop_duplicates()
errores_train = metricas_train['MAPE']
errores_val= metricasval['MAPE']
errores_test = metricas_test['MAPE']
# Crear una lista que contenga todas las listas de residuos
residuos = [errores_train, errores_val, errores_test]
# Crear el Boxplot usando Seaborn
plt.figure(figsize=(8, 6))
sns.boxplot(data=residuos)
# Personalizar el gráfico
plt.title('Comparación de Residuos entre 3 Listas')
plt.xlabel('Lista')
plt.ylabel('Valor del Residuo')
plt.xticks(ticks=[0, 1, 2], labels=['Residuos train', 'Residuos val', 'Residuos test'])
# Mostrar el gráfico
plt.show()
Las medianas son similares al igual que los las cajas porque al manejar la metodología de ventanas deslizantes los conjuntos son iguales a excepción de la primera y ultima ventana. sin embargo las medianas se van reduciendo porque el modelo aprende luego de evaluar el conjunto de validación
Prediciones#
mejormodelo=resultados['best_model_price'][1]
X_train, Y_train, X_val, Y_val, X_test, Y_test=create_sliding_windows(datos, 7, 7)
train=mejormodelo.predict(X_train)
val=mejormodelo.predict(X_val)
test=mejormodelo.predict(X_test)
156/156 [==============================] - 2s 13ms/step
156/156 [==============================] - 2s 14ms/step
156/156 [==============================] - 2s 12ms/step
import plotly.express as px
import pandas as pd
df = pd.DataFrame(train)
datos = []
for i, row in enumerate(df.values):
for j, value in enumerate(row):
datos.append({"Fila": f"Fila {i + 1}", "Índice": i + 1 + j, "Valor": value})
df_plot = pd.DataFrame(datos)
fig = px.line(
df_plot,
x="Índice",
y="Valor",
color="Fila",
markers=True,
title="Gráfico de Filas del Array con Ventanas Superpuestas"
)
fig.show()
Aquí podemos ver la predicción de las ventanas para entrenamiento de manera independiente
train_data = []
for i, row in enumerate(train):
for j, value in enumerate(row):
train_data.append({
"Conjunto": "Train",
"Índice": i + 1 + j,
"Valor": value
})
val_data = []
for i, row in enumerate(val):
for j, value in enumerate(row):
train_data.append({
"Conjunto": "val",
"Índice": i + 8+ j,
"Valor": value
})
test_data = []
for i, row in enumerate(test):
for j, value in enumerate(row):
train_data.append({
"Conjunto": "test",
"Índice": i + 16+ j,
"Valor": value
})
# Preparar los datos para la lista 'datos'
datos_data = []
for i, value in enumerate(data['Price']):
datos_data.append({
"Conjunto": "Datos",
"Fila": "Serie Datos",
"Índice": i+1,
"Valor": value
})
# Combinar ambos conjuntos de datos en un DataFrame
df_plot = pd.DataFrame(train_data + datos_data+val_data+test_data)
# Crear el gráfico con Plotly Express
fig = px.line(
df_plot,
x="Índice",
y="Valor",
color="Conjunto", # Diferenciar 'Train' y 'Datos'
# Mantener la separación por filas
markers=True,
title="Gráfico de Train y Lista Datos"
)
# Mostrar la figura
fig.show()
En la grafica se puede los horizones de predicción para cada ventana. el conjunto de train es igual al de validación sin embargo la prueba se mantiene con diferencia